import tensorflow as tf
import tensorflow.contrib.eager as tfe
tfe.enable_eager_execution() # 注释本行就可以关闭 Eager Execution
class MNISTModel(tfe.Network):
def __init__(self):
super(MNISTModel, self).__init__()
self.layer1 = self.track_layer(tf.layers.Dense(units=10))
self.layer2 = self.track_layer(tf.layers.Dense(units=10))
def call(self, input):
"""Actually runs the model."""
result = self.layer1(input)
result = self.layer2(result)
return result
# Let's make up a blank input image
model = MNISTModel()
batch = tf.zeros([1, 1, 784])
print(batch.shape)
# (1, 1, 784)
result = model(batch)
print(result)
# tf.Tensor([[[ 0. 0., ...., 0.]]], shape=(1, 1, 10), dtype=float32)
(1, 1, 784) tf.Tensor([[[ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]]], shape=(1, 1, 10), dtype=float32)
def loss_function(model, x, y):
y_ = model(x)
return tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=y_)
# if not sure we have GPU or not, CPU is always safety
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)
for (x, y) in tfe.Iterator(dataset):
grads = tfe.implicit_gradients(loss_function)(model, x, y)
optimizer.apply_gradients(grads)
--------------------------------------------------------------------------- NameError Traceback (most recent call last) <ipython-input-6-8fd83e3a491a> in <module>() 1 optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001) ----> 2 for (x, y) in tfe.Iterator(dataset): 3 grads = tfe.implicit_gradients(loss_function)(model, x, y) 4 optimizer.apply_gradients(grads) NameError: name 'dataset' is not defined
# if we have GPU
with tf.device("/gpu:0"):
for (x, y) in tfe.Iterator(dataset):
optimizer.minimize(lambda: loss_function(model, x, y))